3 % @file pfqn_lcfsqn_mva.m
4 % @brief Exact MVA
for 2-station LCFS queueing networks.
10 % @brief Exact MVA
for 2-station LCFS queueing networks.
11 % @fn pfqn_lcfsqn_mva(alpha, beta, N)
12 % @param alpha Service rates at LCFS station (1xR vector).
13 % @param beta Service rates at LCFS-PR station (1xR vector).
14 % @param N Population vector (
default: ones(1,R)).
15 % @return T Throughput vector.
16 % @return Q Mean queue lengths (2xR matrix).
17 % @return U Utilization (2xR matrix).
18 % @return B Back probability matrix (2xR).
21function [T,Q,U,B] = pfqn_lcfsqn_mva(alpha, beta, N)
22% [T,Q,U,B] = PFQN_LCFSQN_MVA(ALPHA, BETA, N)
23% Mean Value Analysis for multiclass LCFS queueing networks
25% This function computes performance metrics for a 2-station closed
26% queueing network with:
27% - Station 1: LCFS (Last-Come-First-Served, non-preemptive)
28% - Station 2: LCFS-PR (LCFS with Preemption-Resume)
31% alpha - vector of inverse service rates at station 1 (LCFS)
32% alpha(r) = 1/mu(1,r) for class r
33% beta - vector of inverse service rates at station 2 (LCFS-PR)
34% beta(r) = 1/mu(2,r) for class r
35% N - population vector, N(r) = number of jobs of class r
36% (default: ones(1,R) - one job per class)
39% T - throughput vector, T(r) = throughput of class r
40% Q - queue length matrix, Q(i,r) = mean queue length at station i, class r
41% U - utilization matrix, U(i,r) = utilization at station i, class r
42% B - back probability matrix, B(i,r) = probability class r job
is at
43% back of queue at station i
45% Note: This implementation uses log-space arithmetic to prevent numerical
46% underflow. The results are mathematically exact (up to floating-point
47% precision) - no approximations are made.
50% G. Casale,
"A family of multiclass LCFS queueing networks with
51% order-dependent product-form solutions", QUESTA 2026.
53% Copyright (c) 2012-2026, Imperial College London
69 % Precompute log values
for numerical stability
70 log_alpha = log(alpha);
75 prods(r) = prod(N(1:r-1) + 1);
78 QN = cell(1, prod(N + 1));
79 TN = cell(1, prod(N + 1));
80 % Store B in log-scaled form to prevent underflow:
81 % B_actual = BN_scaled * exp(log_scale_BN)
82 % This representation
is EXACT - no approximation.
83 BN_scaled = cell(1, prod(N + 1));
84 log_scale_BN = cell(1, prod(N + 1));
88 idx = hashpop(n, N, R, prods);
89 QN{idx} = zeros(2, R);
90 BN_scaled{idx} = zeros(2, R);
91 log_scale_BN{idx} = -inf(2, R); % -inf represents 0 in log-space
92 TN{idx} = zeros(1, R);
95 % Base
case: single job - no scaling needed
98 denom = alpha(r) + beta(r);
99 QN{idx}(1, r) = alpha(r) / denom;
100 QN{idx}(2, r) = beta(r) / denom;
101 % At n=1, B = Q directly (no prod(alpha.^n) factor)
102 BN_scaled{idx}(1, r) = alpha(r) / denom;
103 BN_scaled{idx}(2, r) = beta(r) / denom;
104 log_scale_BN{idx}(1, r) = 0; % exp(0) = 1, so actual = scaled
105 log_scale_BN{idx}(2, r) = 0;
106 TN{idx}(1, r) = 1 / denom;
110 % Recursive
case: multiple jobs
111 % log_scale_np = log(prod(alpha.^n)) - can be very negative
112 log_scale_np = sum(n .* log_alpha);
116 idx_k = hashpop(oner(n, k), N, R, prods);
118 % Compute unscaled waiting time contributions
119 % These are O(1) and don't underflow
120 Wnp_unscaled = 1 + QN{idx_k}(1, k);
121 Wpr_unscaled = 1 + QN{idx_k}(2, k);
123 for r = setdiff(1:R, k)
125 idx_r = hashpop(oner(n, r), N, R, prods);
127 % Compute B ratio in log-space:
128 % ratio = B_{n-e_k}(1,r) / B_{n-e_r}(1,k)
129 % The scaling factors largely cancel in ratios
130 if BN_scaled{idx_k}(1, r) > 0 && BN_scaled{idx_r}(1, k) > 0
131 log_ratio_np = log(BN_scaled{idx_k}(1, r)) + log_scale_BN{idx_k}(1, r) ...
132 - log(BN_scaled{idx_r}(1, k)) - log_scale_BN{idx_r}(1, k);
133 ratio_np = exp(log_ratio_np);
134 Wnp_unscaled = Wnp_unscaled + (alpha(k) / alpha(r)) * ratio_np * QN{idx_r}(1, k);
137 if BN_scaled{idx_k}(2, r) > 0 && BN_scaled{idx_r}(2, k) > 0
138 log_ratio_pr = log(BN_scaled{idx_k}(2, r)) + log_scale_BN{idx_k}(2, r) ...
139 - log(BN_scaled{idx_r}(2, k)) - log_scale_BN{idx_r}(2, k);
140 ratio_pr = exp(log_ratio_pr);
141 Wpr_unscaled = Wpr_unscaled + (alpha(r) / alpha(k)) * ratio_pr * QN{idx_r}(2, k);
146 % log_scale_pr_k = log(alpha(k)^(sum(n)-1) * beta(k))
147 log_scale_pr_k = (sum(n) - 1) * log_alpha(k) + log_beta(k);
149 % Compute Y(k) = n(k) / (Wnp + Wpr)
using log-sum-exp
150 % Wnp = exp(log_scale_np) * Wnp_unscaled
151 % Wpr = exp(log_scale_pr_k) * Wpr_unscaled
152 log_Wnp = log_scale_np + log(Wnp_unscaled);
153 log_Wpr = log_scale_pr_k + log(Wpr_unscaled);
155 % log-sum-exp
for numerical stability
156 max_log = max(log_Wnp, log_Wpr);
157 log_sum_W = max_log + log(exp(log_Wnp - max_log) + exp(log_Wpr - max_log));
159 % B(1,k) = prod(alpha.^n) * n(k) / (Wnp + Wpr)
160 % = n(k) * exp(log_scale_np - log_sum_W)
161 BN_scaled{idx}(1, k) = n(k);
162 log_scale_BN{idx}(1, k) = log_scale_np - log_sum_W;
164 % B(2,k) = alpha(k)^(sum(n)-1) * beta(k) * n(k) / (Wnp + Wpr)
165 % = n(k) * exp(log_scale_pr_k - log_sum_W)
166 BN_scaled{idx}(2, k) = n(k);
167 log_scale_BN{idx}(2, k) = log_scale_pr_k - log_sum_W;
171 % Obtain queue lengths: Q(i,k) = B(i,k) + sum_r B(i,r) * Q_{n-e_r}(i,k)
174 % Accumulate terms using log-sum-exp
175 % Each term: B_actual(i,r) * Q_{n-e_r}(i,k)
176 % = BN_scaled(i,r) * exp(log_scale_BN(i,r)) * Q_{n-e_r}(i,k)
181 % First term: B(station,k)
182 if BN_scaled{idx}(station, k) > 0
183 terms(end+1) = BN_scaled{idx}(station, k);
184 log_scales(end+1) = log_scale_BN{idx}(station, k);
187 % Sum over r: B(station,r) * Q_{n-e_r}(station,k)
190 idx_r = hashpop(oner(n, r), N, R, prods);
191 if BN_scaled{idx}(station, r) > 0 && QN{idx_r}(station, k) > 0
192 terms(end+1) = BN_scaled{idx}(station, r) * QN{idx_r}(station, k);
193 log_scales(end+1) = log_scale_BN{idx}(station, r);
198 QN{idx}(station, k) = pfqn_lcfsqn_mva_logsumexp(terms, log_scales);
206 idx_k = hashpop(oner(n, k), N, R, prods);
208 % Compute Unp = sum_r alpha(r) * T_{n-e_k}(r)
211 Unp = Unp + alpha(r) * TN{idx_k}(1, r);
214 % T(k) = sum_r B(1,r) * T_{n-e_r}(k) + (1/alpha(k)) * B(1,k) * (1-Unp)
220 idx_r = hashpop(oner(n, r), N, R, prods);
221 if BN_scaled{idx}(1, r) > 0 && TN{idx_r}(1, k) > 0
222 terms(end+1) = BN_scaled{idx}(1, r) * TN{idx_r}(1, k);
223 log_scales(end+1) = log_scale_BN{idx}(1, r);
228 % Add (1/alpha(k)) * B(1,k) * (1-Unp) term
229 if BN_scaled{idx}(1, k) > 0 && (1 - Unp) > 0
230 terms(end+1) = (1 / alpha(k)) * BN_scaled{idx}(1, k) * (1 - Unp);
231 log_scales(end+1) = log_scale_BN{idx}(1, k);
234 TN{idx}(1, k) = pfqn_lcfsqn_mva_logsumexp(terms, log_scales);
244 % Recover actual B values from scaled representation
245 % B_actual = BN_scaled * exp(log_scale_BN)
248 if BN_scaled{idx}(1, r) > 0
249 B(1, r) = BN_scaled{idx}(1, r) * exp(log_scale_BN{idx}(1, r));
251 if BN_scaled{idx}(2, r) > 0
252 B(2, r) = BN_scaled{idx}(2, r) * exp(log_scale_BN{idx}(2, r));
258 U(1, r) = T(r) * alpha(r);
259 U(2, r) = T(r) * beta(r);
264function result = pfqn_lcfsqn_mva_logsumexp(terms, log_scales)
265% PFQN_LCFSQN_MVA_LOGSUMEXP Compute sum of scaled terms
using log-sum-exp
267% Computes: result = sum_i (terms(i) * exp(log_scales(i)))
269% This
is mathematically EXACT - we
're just computing the sum in a
270% numerically stable way by factoring out the maximum exponent.
277% Filter out zero or negative terms (which would give -Inf or complex in log)
278valid = terms > 0 & isfinite(log_scales);
284log_scales = log_scales(valid);
286% Compute log of each term: log(term * exp(log_scale)) = log(term) + log_scale
287log_values = log(terms) + log_scales;
289% Find maximum for numerical stability
290max_log = max(log_values);
297% log-sum-exp: log(sum(exp(x))) = max(x) + log(sum(exp(x - max(x))))
298% So: sum(exp(x)) = exp(max(x)) * sum(exp(x - max(x)))
299result = exp(max_log) * sum(exp(log_values - max_log));