1function [G,lG] = pfqn_conv(L, N, Z, mu, ljcdscaling, ljcdcutoffs, options)
2% [G,LG] = PFQN_CONV(L, N, Z, MU, LJCDSCALING, LJCDCUTOFFS, OPTIONS)
4% Multichain convolution algorithm
for closed queueing networks with
5% Limited Joint Class Dependence (LJCD) service rates.
7% Implements the convolution algorithm of Sauer (1983), Section 5.2,
8%
"Computational Algorithms for State-Dependent Queueing Networks",
9% ACM TOCS, Vol. 1, No. 1, pp. 67-92.
11% The algorithm computes G(N) = (X_1 * X_2 * ... * X_M)(N) where X_m(n)
12%
is the station factor at population vector n, and * denotes the
13% multivariate discrete convolution:
14% A(n) = sum_{i: 0<=i<=n} B(i) * C(n-i)
16% For LJCD stations, X_m(n)
is computed recursively via Sauer eq. (40):
17% X_m(n) = (u_km / mu_km(n)) * X_m(n - e_k)
18% where the LJCD scaling table provides the rate function mu_km(n).
20% For standard (load-independent) stations, X_m(n) reduces to the
21% multinomial form and the convolution uses the efficient recurrence:
22% G_m(n) = G_{m-1}(n) + sum_r L(m,r) * G_m(n - e_r)
25% L - Service demand matrix (M x R)
26% N - Population vector (1 x R), must be finite (closed network)
27% Z - Think time vector (1 x R), or empty
28% mu - Load-dependent rate matrix (M x sum(N)), or empty for LI
29% ljcdscaling - Cell
array {M,1} where ljcdscaling{ist}{r}
is the
30% linearized scaling table
for station ist and
class r
31% ljcdcutoffs - Cell
array {
M,1} where ljcdcutoffs{ist}
is the
32% per-
class cutoffs vector [N1,...,NR] for station ist
33% options - Solver options (optional)
36% G - Normalizing constant G(N)
39% Copyright (c) 2012-2026, Imperial College London
44if nargin < 3 || isempty(Z)
47if nargin < 4 || isempty(mu)
50if nargin < 5 || isempty(ljcdscaling)
51 ljcdscaling = cell(M, 1);
53if nargin < 6 || isempty(ljcdcutoffs)
54 ljcdcutoffs = cell(M, 1);
58 line_error(mfilename,
'Convolution algorithm requires finite (closed) populations.');
61% Total state space size
62stateSpaceSize = prod(N + 1);
64% Identify which stations have LJCD scaling
67 isLjcd(ist) = ~isempty(ljcdscaling{ist});
70% --- Precompute X_m(n) tables
for LJCD stations ---
71% For LJCD stations, X_m(n)
is built via Sauer eq. (40):
72% X_m(n) = (u_km / mu_km(n)) * X_m(n - e_k), X_m(0) = 1
73% where u_km = L(m,k) (relative utilization) and mu_km(n)
is the
74% LJCD service rate for chain k at station m with population n.
76% The LJCD scaling table stores throughput values mu_km(n) (= service
77% RATE at population n). From eq. (40), the recurrence factor
is:
78% u_km / mu_km(n) = L(m,k) / ljcdscaling{m}{k}(n)
79% where L(m,k) = r_km * S_km
is the demand (
visits * service time).
84 Xm{ist} = zeros(stateSpaceSize, 1);
85 Xm{ist}(1) = 1; % X_m(0) = 1
86 cutoffs = ljcdcutoffs{ist};
88 % Enumerate all population vectors and build X_m(n) recursively
95 % Use eq. (40): X_m(n) = (u_km / mu_km(n)) * X_m(n-e_k)
96 % Pick first class k with n(k) > 0
99 % Get service rate mu_km(n) from LJCD table
100 nClamped = min(n, cutoffs);
101 lidx = ljd_linearize(nClamped, cutoffs);
102 if r <= length(ljcdscaling{ist}) && ~isempty(ljcdscaling{ist}{r}) ...
103 && lidx <= length(ljcdscaling{ist}{r})
104 mu_km = ljcdscaling{ist}{r}(lidx);
109 % X_m(n) = (L(m,r) / mu_km) * X_m(n - e_r)
111 idx_prev = hashpop(n, N);
114 Xm{ist}(idx) = (L(ist, r) / mu_km) * Xm{ist}(idx_prev);
128% G_0(n) = F_Z(n) (delay contribution)
129% G_m(n) = (G_{m-1} * X_m)(n)
for LJCD stations
130% G_m(n) = G_{m-1}(n) + sum_r L(m,r) * G_m(n-e_r)
for LI stations
131% G_m(n) = G_{m-1}(n) + sum_r (L(m,r)/mu(m,1)) * G_m(n-e_r)
for LLD
132% (with shifted mu, as in pfqn_ca)
134G_curr = zeros(stateSpaceSize, 1);
136% Initialize G_0(n) = F_Z(n): delay server contribution
140 G_curr(idx) = Fz(Z, n);
144% Convolve one station at a time
147 % LJCD station: direct convolution sum
148 % G_new(n) = sum_{i: 0<=i<=n} X_m(i) * G_old(n-i)
150 G_curr = zeros(stateSpaceSize, 1);
154 idx_n = hashpop(n, N);
157 % Inner loop: enumerate all i from 0 to n
160 idx_i = hashpop(i, N);
161 nmi = n - i; % n - i (component-wise)
162 idx_nmi = hashpop(nmi, N);
163 conv_sum = conv_sum + Xm{ist}(idx_i) * G_old(idx_nmi);
167 G_curr(idx_n) = conv_sum;
171 % LI or LLD station: efficient recurrence
172 % G_m(n) = G_{m-1}(n) + sum_r L(m,r) * G_m(n - e_r)
173 % For LLD, replace L(m,r) with L(m,r)/mu(m,1) and shift mu
174 % after each population step (as in pfqn_ca/pfqn_gld)
177 idx_n = hashpop(n, N);
178 % G_curr(idx_n) already has G_{m-1}(n) from previous iteration
182 idx_n1r = hashpop(n, N);
184 G_curr(idx_n) = G_curr(idx_n) + L(ist, r) * G_curr(idx_n1r);
192G = G_curr(end); % G(N)
is at the last index (hashpop(N,N) = prod(N+1))
196%% --- Local functions ---
198function idx = hashpop(n, N)
199% HASHPOP Map population vector to linear index (1-based)
200% idx = 1 + n(1) + n(2)*(N(1)+1) + n(3)*(N(1)+1)*(N(2)+1) + ...
204 idx = idx + prod(N(1:r-1) + 1) * n(r);
208function [n] = pprod(n, N)
209% PPROD Sequentially generate all vectors n: 0 <= n <= N
210% n = pprod(N) - initialize to zeros
211% n = pprod(n, N) - advance to next vector, returns n(1)=-1 when done
225while s > 0 && n(s) == N(s)
235% FZ Delay server unnormalized probability factor
236% F = (Z(1)^n(1) / n(1)!) * ... * (Z(R)^n(R) / n(R)!)
245 f = f + log(Z(r)) * n(r);
246 f = f - gammaln(1 + n(r));