LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
pfqn_lcfsqn_mva.m
1%{
2%{
3 % @file pfqn_lcfsqn_mva.m
4 % @brief Exact MVA for 2-station LCFS queueing networks.
5%}
6%}
7
8%{
9%{
10 % @brief Exact MVA for 2-station LCFS queueing networks.
11 % @fn pfqn_lcfsqn_mva(alpha, beta, N)
12 % @param alpha Service rates at LCFS station (1xR vector).
13 % @param beta Service rates at LCFS-PR station (1xR vector).
14 % @param N Population vector (default: ones(1,R)).
15 % @return T Throughput vector.
16 % @return Q Mean queue lengths (2xR matrix).
17 % @return U Utilization (2xR matrix).
18 % @return B Back probability matrix (2xR).
19%}
20%}
21function [T,Q,U,B] = pfqn_lcfsqn_mva(alpha, beta, N)
22% [T,Q,U,B] = PFQN_LCFSQN_MVA(ALPHA, BETA, N)
23% Mean Value Analysis for multiclass LCFS queueing networks
24%
25% This function computes performance metrics for a 2-station closed
26% queueing network with:
27% - Station 1: LCFS (Last-Come-First-Served, non-preemptive)
28% - Station 2: LCFS-PR (LCFS with Preemption-Resume)
29%
30% Parameters:
31% alpha - vector of inverse service rates at station 1 (LCFS)
32% alpha(r) = 1/mu(1,r) for class r
33% beta - vector of inverse service rates at station 2 (LCFS-PR)
34% beta(r) = 1/mu(2,r) for class r
35% N - population vector, N(r) = number of jobs of class r
36% (default: ones(1,R) - one job per class)
37%
38% Returns:
39% T - throughput vector, T(r) = throughput of class r
40% Q - queue length matrix, Q(i,r) = mean queue length at station i, class r
41% U - utilization matrix, U(i,r) = utilization at station i, class r
42% B - back probability matrix, B(i,r) = probability class r job is at
43% back of queue at station i
44%
45% Note: This implementation uses log-space arithmetic to prevent numerical
46% underflow. The results are mathematically exact (up to floating-point
47% precision) - no approximations are made.
48%
49% Reference:
50% G. Casale, "A family of multiclass LCFS queueing networks with
51% order-dependent product-form solutions", QUESTA 2026.
52%
53% Copyright (c) 2012-2026, Imperial College London
54% All rights reserved.
55
56R = length(alpha);
57if nargin < 3
58 N = ones(1, R);
59end
60K = sum(N);
61
62if K == 0
63 Q = zeros(2, R);
64 T = zeros(1, R);
65 B = zeros(2, R);
66 U = zeros(2, R);
67 return
68else
69 % Precompute log values for numerical stability
70 log_alpha = log(alpha);
71 log_beta = log(beta);
72
73 prods = zeros(1, R);
74 for r = 1:R
75 prods(r) = prod(N(1:r-1) + 1);
76 end
77
78 nstates = prod(N + 1);
79 QN = cell(1, nstates);
80 TN = cell(1, nstates);
81 % Store B in log-scaled form to prevent underflow:
82 % B_actual = BN_scaled * exp(log_scale_BN)
83 % This representation is EXACT - no approximation.
84 BN_scaled = cell(1, nstates);
85 log_scale_BN = cell(1, nstates);
86 for ci__=1:nstates
87 QN{ci__} = zeros(2, R);
88 TN{ci__} = zeros(1, R);
89 BN_scaled{ci__} = zeros(2, R);
90 log_scale_BN{ci__} = -inf(2, R);
91 end
92
93 idx = 1; % pre-initialize for codegen
94 n = pprod(N);
95 while n >= 0
96 idx = hashpop(n, N, R, prods);
97 QN{idx} = zeros(2, R);
98 BN_scaled{idx} = zeros(2, R);
99 log_scale_BN{idx} = -inf(2, R); % -inf represents 0 in log-space
100 TN{idx} = zeros(1, R);
101
102 if sum(n) == 1
103 % Base case: single job - no scaling needed
104 for r = 1:R
105 if n(r) > 0
106 denom = alpha(r) + beta(r);
107 QN{idx}(1, r) = alpha(r) / denom;
108 QN{idx}(2, r) = beta(r) / denom;
109 % At n=1, B = Q directly (no prod(alpha.^n) factor)
110 BN_scaled{idx}(1, r) = alpha(r) / denom;
111 BN_scaled{idx}(2, r) = beta(r) / denom;
112 log_scale_BN{idx}(1, r) = 0; % exp(0) = 1, so actual = scaled
113 log_scale_BN{idx}(2, r) = 0;
114 TN{idx}(1, r) = 1 / denom;
115 end
116 end
117 else
118 % Recursive case: multiple jobs
119 % log_scale_np = log(prod(alpha.^n)) - can be very negative
120 log_scale_np = sum(n .* log_alpha);
121
122 for k = 1:R
123 if n(k) > 0
124 idx_k = hashpop(oner(n, k), N, R, prods);
125
126 % Compute unscaled waiting time contributions
127 % These are O(1) and don't underflow
128 Wnp_unscaled = 1 + QN{idx_k}(1, k);
129 Wpr_unscaled = 1 + QN{idx_k}(2, k);
130
131 for r = 1:R
132 if r == k; continue; end
133 if n(r) > 0
134 idx_r = hashpop(oner(n, r), N, R, prods);
135
136 % Compute B ratio in log-space:
137 % ratio = B_{n-e_k}(1,r) / B_{n-e_r}(1,k)
138 % The scaling factors largely cancel in ratios
139 if BN_scaled{idx_k}(1, r) > 0 && BN_scaled{idx_r}(1, k) > 0
140 log_ratio_np = log(BN_scaled{idx_k}(1, r)) + log_scale_BN{idx_k}(1, r) ...
141 - log(BN_scaled{idx_r}(1, k)) - log_scale_BN{idx_r}(1, k);
142 ratio_np = exp(log_ratio_np);
143 Wnp_unscaled = Wnp_unscaled + (alpha(k) / alpha(r)) * ratio_np * QN{idx_r}(1, k);
144 end
145
146 if BN_scaled{idx_k}(2, r) > 0 && BN_scaled{idx_r}(2, k) > 0
147 log_ratio_pr = log(BN_scaled{idx_k}(2, r)) + log_scale_BN{idx_k}(2, r) ...
148 - log(BN_scaled{idx_r}(2, k)) - log_scale_BN{idx_r}(2, k);
149 ratio_pr = exp(log_ratio_pr);
150 Wpr_unscaled = Wpr_unscaled + (alpha(r) / alpha(k)) * ratio_pr * QN{idx_r}(2, k);
151 end
152 end
153 end
154
155 % log_scale_pr_k = log(alpha(k)^(sum(n)-1) * beta(k))
156 log_scale_pr_k = (sum(n) - 1) * log_alpha(k) + log_beta(k);
157
158 % Compute Y(k) = n(k) / (Wnp + Wpr) using log-sum-exp
159 % Wnp = exp(log_scale_np) * Wnp_unscaled
160 % Wpr = exp(log_scale_pr_k) * Wpr_unscaled
161 log_Wnp = log_scale_np + log(Wnp_unscaled);
162 log_Wpr = log_scale_pr_k + log(Wpr_unscaled);
163
164 % log-sum-exp for numerical stability
165 max_log = max(log_Wnp, log_Wpr);
166 log_sum_W = max_log + log(exp(log_Wnp - max_log) + exp(log_Wpr - max_log));
167
168 % B(1,k) = prod(alpha.^n) * n(k) / (Wnp + Wpr)
169 % = n(k) * exp(log_scale_np - log_sum_W)
170 BN_scaled{idx}(1, k) = n(k);
171 log_scale_BN{idx}(1, k) = log_scale_np - log_sum_W;
172
173 % B(2,k) = alpha(k)^(sum(n)-1) * beta(k) * n(k) / (Wnp + Wpr)
174 % = n(k) * exp(log_scale_pr_k - log_sum_W)
175 BN_scaled{idx}(2, k) = n(k);
176 log_scale_BN{idx}(2, k) = log_scale_pr_k - log_sum_W;
177 end
178 end
179
180 % Obtain queue lengths: Q(i,k) = B(i,k) + sum_r B(i,r) * Q_{n-e_r}(i,k)
181 for k = 1:R
182 if n(k) > 0
183 % Accumulate terms using log-sum-exp
184 % Each term: B_actual(i,r) * Q_{n-e_r}(i,k)
185 % = BN_scaled(i,r) * exp(log_scale_BN(i,r)) * Q_{n-e_r}(i,k)
186 for station = 1:2
187 terms = [];
188 log_scales = [];
189
190 % First term: B(station,k)
191 if BN_scaled{idx}(station, k) > 0
192 terms(end+1) = BN_scaled{idx}(station, k);
193 log_scales(end+1) = log_scale_BN{idx}(station, k);
194 end
195
196 % Sum over r: B(station,r) * Q_{n-e_r}(station,k)
197 for r = 1:R
198 if n(r) > 0
199 idx_r = hashpop(oner(n, r), N, R, prods);
200 if BN_scaled{idx}(station, r) > 0 && QN{idx_r}(station, k) > 0
201 terms(end+1) = BN_scaled{idx}(station, r) * QN{idx_r}(station, k);
202 log_scales(end+1) = log_scale_BN{idx}(station, r);
203 end
204 end
205 end
206
207 QN{idx}(station, k) = pfqn_lcfsqn_mva_logsumexp(terms, log_scales);
208 end
209 end
210 end
211
212 % Obtain throughput
213 for k = 1:R
214 if n(k) > 0
215 idx_k = hashpop(oner(n, k), N, R, prods);
216
217 % Compute Unp = sum_r alpha(r) * T_{n-e_k}(r)
218 Unp = 0;
219 for r = 1:R
220 Unp = Unp + alpha(r) * TN{idx_k}(1, r);
221 end
222
223 % T(k) = sum_r B(1,r) * T_{n-e_r}(k) + (1/alpha(k)) * B(1,k) * (1-Unp)
224 terms = [];
225 log_scales = [];
226
227 for r = 1:R
228 if n(r) > 0
229 idx_r = hashpop(oner(n, r), N, R, prods);
230 if BN_scaled{idx}(1, r) > 0 && TN{idx_r}(1, k) > 0
231 terms(end+1) = BN_scaled{idx}(1, r) * TN{idx_r}(1, k);
232 log_scales(end+1) = log_scale_BN{idx}(1, r);
233 end
234 end
235 end
236
237 % Add (1/alpha(k)) * B(1,k) * (1-Unp) term
238 if BN_scaled{idx}(1, k) > 0 && (1 - Unp) > 0
239 terms(end+1) = (1 / alpha(k)) * BN_scaled{idx}(1, k) * (1 - Unp);
240 log_scales(end+1) = log_scale_BN{idx}(1, k);
241 end
242
243 TN{idx}(1, k) = pfqn_lcfsqn_mva_logsumexp(terms, log_scales);
244 end
245 end
246 end
247 n = pprod(n, N);
248 end
249
250 Q = QN{idx};
251 T = TN{idx};
252
253 % Recover actual B values from scaled representation
254 % B_actual = BN_scaled * exp(log_scale_BN)
255 B = zeros(2, R);
256 for r = 1:R
257 if BN_scaled{idx}(1, r) > 0
258 B(1, r) = BN_scaled{idx}(1, r) * exp(log_scale_BN{idx}(1, r));
259 end
260 if BN_scaled{idx}(2, r) > 0
261 B(2, r) = BN_scaled{idx}(2, r) * exp(log_scale_BN{idx}(2, r));
262 end
263 end
264
265 U = zeros(2, R);
266 for r = 1:R
267 U(1, r) = T(r) * alpha(r);
268 U(2, r) = T(r) * beta(r);
269 end
270end
271end
272
273function result = pfqn_lcfsqn_mva_logsumexp(terms, log_scales)
274% PFQN_LCFSQN_MVA_LOGSUMEXP Compute sum of scaled terms using log-sum-exp
275%
276% Computes: result = sum_i (terms(i) * exp(log_scales(i)))
277%
278% This is mathematically EXACT - we're just computing the sum in a
279% numerically stable way by factoring out the maximum exponent.
280
281if isempty(terms)
282 result = 0;
283 return;
284end
285
286% Filter out zero or negative terms (which would give -Inf or complex in log)
287valid = terms > 0 & isfinite(log_scales);
288if ~any(valid)
289 result = 0;
290 return;
291end
292terms = terms(valid);
293log_scales = log_scales(valid);
294
295% Compute log of each term: log(term * exp(log_scale)) = log(term) + log_scale
296log_values = log(terms) + log_scales;
297
298% Find maximum for numerical stability
299max_log = max(log_values);
300
301if ~isfinite(max_log)
302 result = 0;
303 return;
304end
305
306% log-sum-exp: log(sum(exp(x))) = max(x) + log(sum(exp(x - max(x))))
307% So: sum(exp(x)) = exp(max(x)) * sum(exp(x - max(x)))
308result = exp(max_log) * sum(exp(log_values - max_log));
309end