3 % @file pfqn_lcfsqn_mva.m
4 % @brief Exact MVA
for 2-station LCFS queueing networks.
10 % @brief Exact MVA
for 2-station LCFS queueing networks.
11 % @fn pfqn_lcfsqn_mva(alpha, beta, N)
12 % @param alpha Service rates at LCFS station (1xR vector).
13 % @param beta Service rates at LCFS-PR station (1xR vector).
14 % @param N Population vector (
default: ones(1,R)).
15 % @return T Throughput vector.
16 % @return Q Mean queue lengths (2xR matrix).
17 % @return U Utilization (2xR matrix).
18 % @return B Back probability matrix (2xR).
21function [T,Q,U,B] = pfqn_lcfsqn_mva(alpha, beta, N)
22% [T,Q,U,B] = PFQN_LCFSQN_MVA(ALPHA, BETA, N)
23% Mean Value Analysis for multiclass LCFS queueing networks
25% This function computes performance metrics for a 2-station closed
26% queueing network with:
27% - Station 1: LCFS (Last-Come-First-Served, non-preemptive)
28% - Station 2: LCFS-PR (LCFS with Preemption-Resume)
31% alpha - vector of inverse service rates at station 1 (LCFS)
32% alpha(r) = 1/mu(1,r) for class r
33% beta - vector of inverse service rates at station 2 (LCFS-PR)
34% beta(r) = 1/mu(2,r) for class r
35% N - population vector, N(r) = number of jobs of class r
36% (default: ones(1,R) - one job per class)
39% T - throughput vector, T(r) = throughput of class r
40% Q - queue length matrix, Q(i,r) = mean queue length at station i, class r
41% U - utilization matrix, U(i,r) = utilization at station i, class r
42% B - back probability matrix, B(i,r) = probability class r job
is at
43% back of queue at station i
45% Note: This implementation uses log-space arithmetic to prevent numerical
46% underflow. The results are mathematically exact (up to floating-point
47% precision) - no approximations are made.
50% G. Casale,
"A family of multiclass LCFS queueing networks with
51% order-dependent product-form solutions", QUESTA 2026.
53% Copyright (c) 2012-2026, Imperial College London
69 % Precompute log values
for numerical stability
70 log_alpha = log(alpha);
75 prods(r) = prod(N(1:r-1) + 1);
78 nstates = prod(N + 1);
79 QN = cell(1, nstates);
80 TN = cell(1, nstates);
81 % Store B in log-scaled form to prevent underflow:
82 % B_actual = BN_scaled * exp(log_scale_BN)
83 % This representation
is EXACT - no approximation.
84 BN_scaled = cell(1, nstates);
85 log_scale_BN = cell(1, nstates);
87 QN{ci__} = zeros(2, R);
88 TN{ci__} = zeros(1, R);
89 BN_scaled{ci__} = zeros(2, R);
90 log_scale_BN{ci__} = -inf(2, R);
93 idx = 1; % pre-initialize
for codegen
96 idx = hashpop(n, N, R, prods);
97 QN{idx} = zeros(2, R);
98 BN_scaled{idx} = zeros(2, R);
99 log_scale_BN{idx} = -inf(2, R); % -inf represents 0 in log-space
100 TN{idx} = zeros(1, R);
103 % Base
case: single job - no scaling needed
106 denom = alpha(r) + beta(r);
107 QN{idx}(1, r) = alpha(r) / denom;
108 QN{idx}(2, r) = beta(r) / denom;
109 % At n=1, B = Q directly (no prod(alpha.^n) factor)
110 BN_scaled{idx}(1, r) = alpha(r) / denom;
111 BN_scaled{idx}(2, r) = beta(r) / denom;
112 log_scale_BN{idx}(1, r) = 0; % exp(0) = 1, so actual = scaled
113 log_scale_BN{idx}(2, r) = 0;
114 TN{idx}(1, r) = 1 / denom;
118 % Recursive
case: multiple jobs
119 % log_scale_np = log(prod(alpha.^n)) - can be very negative
120 log_scale_np = sum(n .* log_alpha);
124 idx_k = hashpop(oner(n, k), N, R, prods);
126 % Compute unscaled waiting time contributions
127 % These are O(1) and don't underflow
128 Wnp_unscaled = 1 + QN{idx_k}(1, k);
129 Wpr_unscaled = 1 + QN{idx_k}(2, k);
132 if r == k;
continue; end
134 idx_r = hashpop(oner(n, r), N, R, prods);
136 % Compute B ratio in log-space:
137 % ratio = B_{n-e_k}(1,r) / B_{n-e_r}(1,k)
138 % The scaling factors largely cancel in ratios
139 if BN_scaled{idx_k}(1, r) > 0 && BN_scaled{idx_r}(1, k) > 0
140 log_ratio_np = log(BN_scaled{idx_k}(1, r)) + log_scale_BN{idx_k}(1, r) ...
141 - log(BN_scaled{idx_r}(1, k)) - log_scale_BN{idx_r}(1, k);
142 ratio_np = exp(log_ratio_np);
143 Wnp_unscaled = Wnp_unscaled + (alpha(k) / alpha(r)) * ratio_np * QN{idx_r}(1, k);
146 if BN_scaled{idx_k}(2, r) > 0 && BN_scaled{idx_r}(2, k) > 0
147 log_ratio_pr = log(BN_scaled{idx_k}(2, r)) + log_scale_BN{idx_k}(2, r) ...
148 - log(BN_scaled{idx_r}(2, k)) - log_scale_BN{idx_r}(2, k);
149 ratio_pr = exp(log_ratio_pr);
150 Wpr_unscaled = Wpr_unscaled + (alpha(r) / alpha(k)) * ratio_pr * QN{idx_r}(2, k);
155 % log_scale_pr_k = log(alpha(k)^(sum(n)-1) * beta(k))
156 log_scale_pr_k = (sum(n) - 1) * log_alpha(k) + log_beta(k);
158 % Compute Y(k) = n(k) / (Wnp + Wpr)
using log-sum-exp
159 % Wnp = exp(log_scale_np) * Wnp_unscaled
160 % Wpr = exp(log_scale_pr_k) * Wpr_unscaled
161 log_Wnp = log_scale_np + log(Wnp_unscaled);
162 log_Wpr = log_scale_pr_k + log(Wpr_unscaled);
164 % log-sum-exp
for numerical stability
165 max_log = max(log_Wnp, log_Wpr);
166 log_sum_W = max_log + log(exp(log_Wnp - max_log) + exp(log_Wpr - max_log));
168 % B(1,k) = prod(alpha.^n) * n(k) / (Wnp + Wpr)
169 % = n(k) * exp(log_scale_np - log_sum_W)
170 BN_scaled{idx}(1, k) = n(k);
171 log_scale_BN{idx}(1, k) = log_scale_np - log_sum_W;
173 % B(2,k) = alpha(k)^(sum(n)-1) * beta(k) * n(k) / (Wnp + Wpr)
174 % = n(k) * exp(log_scale_pr_k - log_sum_W)
175 BN_scaled{idx}(2, k) = n(k);
176 log_scale_BN{idx}(2, k) = log_scale_pr_k - log_sum_W;
180 % Obtain queue lengths: Q(i,k) = B(i,k) + sum_r B(i,r) * Q_{n-e_r}(i,k)
183 % Accumulate terms using log-sum-exp
184 % Each term: B_actual(i,r) * Q_{n-e_r}(i,k)
185 % = BN_scaled(i,r) * exp(log_scale_BN(i,r)) * Q_{n-e_r}(i,k)
190 % First term: B(station,k)
191 if BN_scaled{idx}(station, k) > 0
192 terms(end+1) = BN_scaled{idx}(station, k);
193 log_scales(end+1) = log_scale_BN{idx}(station, k);
196 % Sum over r: B(station,r) * Q_{n-e_r}(station,k)
199 idx_r = hashpop(oner(n, r), N, R, prods);
200 if BN_scaled{idx}(station, r) > 0 && QN{idx_r}(station, k) > 0
201 terms(end+1) = BN_scaled{idx}(station, r) * QN{idx_r}(station, k);
202 log_scales(end+1) = log_scale_BN{idx}(station, r);
207 QN{idx}(station, k) = pfqn_lcfsqn_mva_logsumexp(terms, log_scales);
215 idx_k = hashpop(oner(n, k), N, R, prods);
217 % Compute Unp = sum_r alpha(r) * T_{n-e_k}(r)
220 Unp = Unp + alpha(r) * TN{idx_k}(1, r);
223 % T(k) = sum_r B(1,r) * T_{n-e_r}(k) + (1/alpha(k)) * B(1,k) * (1-Unp)
229 idx_r = hashpop(oner(n, r), N, R, prods);
230 if BN_scaled{idx}(1, r) > 0 && TN{idx_r}(1, k) > 0
231 terms(end+1) = BN_scaled{idx}(1, r) * TN{idx_r}(1, k);
232 log_scales(end+1) = log_scale_BN{idx}(1, r);
237 % Add (1/alpha(k)) * B(1,k) * (1-Unp) term
238 if BN_scaled{idx}(1, k) > 0 && (1 - Unp) > 0
239 terms(end+1) = (1 / alpha(k)) * BN_scaled{idx}(1, k) * (1 - Unp);
240 log_scales(end+1) = log_scale_BN{idx}(1, k);
243 TN{idx}(1, k) = pfqn_lcfsqn_mva_logsumexp(terms, log_scales);
253 % Recover actual B values from scaled representation
254 % B_actual = BN_scaled * exp(log_scale_BN)
257 if BN_scaled{idx}(1, r) > 0
258 B(1, r) = BN_scaled{idx}(1, r) * exp(log_scale_BN{idx}(1, r));
260 if BN_scaled{idx}(2, r) > 0
261 B(2, r) = BN_scaled{idx}(2, r) * exp(log_scale_BN{idx}(2, r));
267 U(1, r) = T(r) * alpha(r);
268 U(2, r) = T(r) * beta(r);
273function result = pfqn_lcfsqn_mva_logsumexp(terms, log_scales)
274% PFQN_LCFSQN_MVA_LOGSUMEXP Compute sum of scaled terms
using log-sum-exp
276% Computes: result = sum_i (terms(i) * exp(log_scales(i)))
278% This
is mathematically EXACT - we
're just computing the sum in a
279% numerically stable way by factoring out the maximum exponent.
286% Filter out zero or negative terms (which would give -Inf or complex in log)
287valid = terms > 0 & isfinite(log_scales);
293log_scales = log_scales(valid);
295% Compute log of each term: log(term * exp(log_scale)) = log(term) + log_scale
296log_values = log(terms) + log_scales;
298% Find maximum for numerical stability
299max_log = max(log_values);
306% log-sum-exp: log(sum(exp(x))) = max(x) + log(sum(exp(x - max(x))))
307% So: sum(exp(x)) = exp(max(x)) * sum(exp(x - max(x)))
308result = exp(max_log) * sum(exp(log_values - max_log));