1function [W_bar, W_bar_n] = map_m1ps_cdfrespt(C, D, mu, x, varargin)
2% MAP_M1PS_CDFRESPT Compute sojourn time distribution in MAP/M/1-PS queue
4% W_bar = MAP_M1PS_CDFRESPT(C, D, mu, x) computes the complementary
5% distribution function of the sojourn time in a MAP/M/1 processor-sharing
6% queue at the points specified in x.
8% [W_bar, W_bar_n] = MAP_M1PS_CDFRESPT(C, D, mu, x) also returns the
9% conditional complementary distributions W_bar_n{i}
for customers finding
10% i-1 customers in the system on arrival.
12% [...] = MAP_M1PS_CDFRESPT(C, D, mu, x,
'Param', Value) specifies optional
14%
'Epsilon' - Truncation parameter
for queue length (
default: 1e-11)
15%
'EpsilonPrime' - Truncation parameter for uniformization (default: 1e-10)
16%
'Verbose' - Display computation progress (default: false)
19% C - M x M matrix governing MAP transitions without arrivals
20% D - M x M matrix governing MAP transitions with arrivals
21% mu - Service rate (scalar, mu > 0)
22% x - Vector of time points at which to evaluate W_bar(x) = Pr[W > x]
25% W_bar - Vector of same size as x, containing Pr[W > x]
26% W_bar_n - Cell array of conditional distributions (optional)
28% The processor-sharing (PS) discipline shares the server equally among
29% all customers. When n customers are present, each receives service at
32% The algorithm implements Theorem 1 from:
33% Masuyama, H., & Takine, T. (2003). Sojourn time distribution in a
34% MAP/M/1 processor-sharing queue. Operations Research Letters, 31(6),
38% % M/M/1-PS queue with lambda=0.8, mu=1
39% lambda = 0.8; mu = 1;
40% C = -lambda; D = lambda; % Poisson arrivals
41% x = linspace(0, 10, 100);
42% W_bar = map_m1ps_cdfrespt(C, D, mu, x);
44% xlabel(
'x'); ylabel(
'Pr[W > x]');
45% title(
'Sojourn time distribution for M/M/1-PS');
47% Copyright (c) 2012-2025, Imperial College London
50%% Parse input arguments
52addRequired(p,
'C', @(x) ismatrix(x) && size(x,1) == size(x,2));
53addRequired(p,
'D', @(x) ismatrix(x) && size(x,1) == size(x,2));
54addRequired(p,
'mu', @(x) isscalar(x) && x > 0);
55addRequired(p,
'x', @(x) isnumeric(x) && all(x >= 0));
56addParameter(p,
'Epsilon', 1e-11, @(x) isscalar(x) && x > 0 && x < 1);
57addParameter(p,
'EpsilonPrime', 1e-10, @(x) isscalar(x) && x > 0 && x < 1);
58addParameter(p,
'Verbose',
false, @islogical);
60parse(p, C, D, mu, x, varargin{:});
61epsilon = p.Results.Epsilon;
62epsilon_prime = p.Results.EpsilonPrime;
63verbose = p.Results.Verbose;
67if size(D, 1) ~= M || size(D, 2) ~= M
68 error(
'MAP_M1PS_CDFRESPT:DimensionMismatch',
'C and D must have the same size');
74%% Compute MAP parameters
75% Stationary probability vector pi of the underlying Markov chain
76% Solve: pi*(C + D) = 0, pi*e = 1
78% Replace last row of Q with normalization constraint
89 fprintf('MAP/M/1-PS Sojourn Time Computation\n
');
90 fprintf(' M (states): %d\n
', M);
91 fprintf(' lambda (arrival rate): %.4f\n
', lambda);
92 fprintf(' mu (service rate): %.4f\n
', mu);
93 fprintf(' rho (utilization): %.4f\n
', lambda/mu);
96% Check stability condition
99 error('MAP_M1PS_CDFRESPT:Unstable
', ...
100 'System
is unstable (rho = %.4f >= 1)
', rho);
105 fprintf('Computing R matrix...\n
');
107R = compute_R_matrix(C, D, mu);
110 fprintf(' R = %.6f\n
', R);
113%% Determine truncation point N(epsilon) for queue length
114% Find minimum N such that: (1/lambda) * sum_{n=0}^N pi_0 * R^n * D * e > 1 - epsilon
115% where pi_0 = pi * (I - R)
118% Compute spectral radius of R to estimate required N
119rho_R = max(abs(eig(R)));
122 error('MAP_M1PS_CDFRESPT:UnstableR
', 'R matrix has spectral radius >= 1');
125% Estimate N based on spectral radius:
126% We need rho_R^N < epsilon * (1 - rho_R)
127% N > log(epsilon * (1 - rho_R)) / log(rho_R)
129 N_estimate = ceil(log(epsilon * (1 - rho_R)) / log(rho_R));
134% Cap at reasonable maximum to avoid excessive computation
136N_epsilon = min(max(N_estimate, 10), N_max);
139 fprintf(
' Spectral radius of R: %.6f\n', rho_R);
140 fprintf(
' Estimated N: %d, using N(epsilon): %d (capped at %d)\n', N_estimate, N_epsilon, N_max);
143%% Compute uniformization parameter theta
144theta = max(abs(diag(C)));
145theta_plus_mu = theta + mu;
148x = x(:)
'; % Ensure row vector
149num_points = length(x);
150W_bar = zeros(1, num_points);
153 W_bar_n = cell(N_epsilon + 1, 1);
155 W_bar_n{n+1} = zeros(1, num_points);
159%% Determine global K_max across all x points
161L_per_point = zeros(1, num_points);
162K_per_point = zeros(1, num_points);
163for idx = 1:num_points
165 mean_val = theta_plus_mu * x_val;
167 L_per_point(idx) = max(0, floor(mean_val - 10*sqrt(mean_val)));
168 R_upper = ceil(mean_val + 10*sqrt(mean_val));
169 pmf = poisspdf(L_per_point(idx):R_upper, mean_val);
170 cumsum_pmf = sum(pmf);
171 while cumsum_pmf < 1 - epsilon_prime && R_upper < 10000
172 R_upper = R_upper + 10;
173 pmf = poisspdf(L_per_point(idx):R_upper, mean_val);
174 cumsum_pmf = sum(pmf);
176 K_per_point(idx) = R_upper;
178 K_max_global = max(K_max_global, K_per_point(idx));
182 fprintf(' K_max (global uniformization truncation): %d\n
', K_max_global);
183 fprintf('Computing h_{n,k} recursion...\n
');
186%% Precompute h_{n,k} using 3D matrix (M x (N+1) x (K+1)) for fast indexing
187h = compute_h_matrix(C, D, mu, M, N_epsilon, K_max_global, theta);
189%% Precompute pi_0 * R^n * D weights with early truncation
190% Use iterative R^n multiplication and stop when weight is negligible
191weight_tol = epsilon * 1e-2;
192weights = zeros(N_epsilon + 1, M); % weights(n+1,:) = pi_0 * R^n * D (row vector)
193R_power = I; % R^0 = I
196 w = pi_0 * R_power * D;
198 if n > 0 && norm(w, inf) < weight_tol
202 R_power = R_power * R;
206 fprintf(' N_actual (early truncation): %d / %d\n
', N_actual, N_epsilon);
209%% Compute sojourn time distribution for each x
210for idx = 1:num_points
212 L = L_per_point(idx);
213 K_max = K_per_point(idx);
215 % Precompute Poisson PMF values for this x point
216 poisson_vals = poisspdf(L:K_max, theta_plus_mu * x_val); % vector
219 weight = weights(n+1, :); % 1 x M row vector
221 % Vectorized sum over k: sum_k(m) = sum_j poisson(j) * h(m, n+1, L+j+1)
222 h_slice = squeeze(h(:, n+1, L+1:K_max+1)); % M x (K_max-L+1)
224 h_slice = h_slice(:)'; % Ensure row vector
for M=1
226 sum_k = h_slice * poisson_vals(:); % M x 1
228 term_n = (1/lambda) * weight * sum_k;
229 W_bar(idx) = W_bar(idx) + term_n;
231 % Store conditional distribution
if requested
233 W_bar_n{n+1}(idx) = sum(sum_k) / M;
239 fprintf(
'Computation complete.\n');
244%% ========================================================================
245% LOCAL HELPER FUNCTIONS
246% ========================================================================
248function R = compute_R_matrix(C, D, mu)
249% Compute rate matrix R
for MAP/M/1 queue
250% Solves: D + R(C - mu*I) + mu*R^2 = O
255% For scalar
case (M=1), use quadratic formula
257 % mu*R^2 + (C - mu)*R + D = 0
261 discriminant = b^2 - 4*a*c;
264 error(
'compute_R_matrix:NoRealSolution',
'No real solution for R');
268 R1 = (-b - sqrt(discriminant)) / (2*a);
269 R2 = (-b + sqrt(discriminant)) / (2*a);
271 % Choose minimal nonnegative solution
274 elseif R2 >= 0 && R2 < 1
277 error(
'compute_R_matrix:NoValidSolution',
'No valid solution in [0,1) for R');
280 % For matrix
case, use fixed-point iteration
281 % From D + R*(C - mu*I) + mu*R^2 = 0, we can derive:
282 % R*(mu*I - C) = D + mu*R^2
283 % R = (D + mu*R^2) * inv(mu*I - C)
284 % which gives the fixed-point iteration: R_new = (D + mu*R_old^2) / (mu*I - C)
286 % Initial approximation
291 % Precompute (mu*I - C)
for efficiency
292 muI_minus_C = mu*I - C;
294 for iter = 1:max_iter
296 % Fixed-point iteration
297 R = (D + mu*(R*R)) / muI_minus_C;
300 if norm(R - R_old, inf) < tol
306 warning(
'compute_R_matrix:SlowConvergence', ...
307 'R computation did not converge within %d iterations', max_iter);
311% Ensure non-negativity (numerical errors might produce small negative values)
315check = D + R*(C - mu*I) + mu*(R*R);
316if norm(check, inf) > 1e-4
317 warning(
'compute_R_matrix:LargeResidual', ...
318 'Large residual in R computation: ||residual|| = %e', norm(check, inf));
323function h = compute_h_matrix(C, D, mu, M, N, K, theta)
324% Compute h_{n,k} coefficients as a 3D matrix h(M, N+1, K+1)
326% The h_{n,k} vectors satisfy the recursion (Theorem 1):
327% h_{n,0} = e (vector of ones), for n = 0, 1, ...
328% h_{n,k+1} = 1/(theta+mu) * [n*mu/(n+1) * h_{n-1,k} + (theta*I + C) * h_{n,k}
330% where h_{-1,k} = 0 for all k
333theta_plus_mu = theta + mu;
334theta_I_plus_C = theta * eye(M) + C;
335inv_tpm = 1 / theta_plus_mu;
337% Store h as M x (N+1) x (K+1) matrix
338h = zeros(M, N+1, K+1);
340% Base case: h_{n,0} = e
for all n
345% Precompute n*mu/(n+1) coefficients
346nmu_coeff = zeros(N+1, 1);
348 nmu_coeff(n+1) = n * mu / (n + 1);
351% Recursive computation: fill column by column (increasing k)
353 % Vectorized over n: extract column k
for all n values
354 h_col_k = h(:, :, k+1); % M x (N+1)
356 % Compute (theta*I + C) * h_{n,k}
for all n at once
357 term2_all = theta_I_plus_C * h_col_k; % M x (N+1)
359 % Compute D * h_{n+1,k}
for n=0..N-1
360 term3_all = D * h_col_k(:, 2:end); % M x N (
for n=0..N-1)
363 term2 = term2_all(:, n+1);
365 % First term: n*mu/(n+1) * h_{n-1,k}
367 term1 = nmu_coeff(n+1) * h_col_k(:, n);
372 % Third term: D * h_{n+1,k}
374 term3 = term3_all(:, n+1);
379 h(:, n+1, k+2) = inv_tpm * (term1 + term2 + term3);