LINE Solver
MATLAB API documentation
Loading...
Searching...
No Matches
map_m1ps_cdfrespt.m
1function [W_bar, W_bar_n] = map_m1ps_cdfrespt(C, D, mu, x, varargin)
2% MAP_M1PS_CDFRESPT Compute sojourn time distribution in MAP/M/1-PS queue
3%
4% W_bar = MAP_M1PS_CDFRESPT(C, D, mu, x) computes the complementary
5% distribution function of the sojourn time in a MAP/M/1 processor-sharing
6% queue at the points specified in x.
7%
8% [W_bar, W_bar_n] = MAP_M1PS_CDFRESPT(C, D, mu, x) also returns the
9% conditional complementary distributions W_bar_n{i} for customers finding
10% i-1 customers in the system on arrival.
11%
12% [...] = MAP_M1PS_CDFRESPT(C, D, mu, x, 'Param', Value) specifies optional
13% parameters:
14% 'Epsilon' - Truncation parameter for queue length (default: 1e-11)
15% 'EpsilonPrime' - Truncation parameter for uniformization (default: 1e-10)
16% 'Verbose' - Display computation progress (default: false)
17%
18% Input:
19% C - M x M matrix governing MAP transitions without arrivals
20% D - M x M matrix governing MAP transitions with arrivals
21% mu - Service rate (scalar, mu > 0)
22% x - Vector of time points at which to evaluate W_bar(x) = Pr[W > x]
23%
24% Output:
25% W_bar - Vector of same size as x, containing Pr[W > x]
26% W_bar_n - Cell array of conditional distributions (optional)
27%
28% The processor-sharing (PS) discipline shares the server equally among
29% all customers. When n customers are present, each receives service at
30% rate 1/n.
31%
32% The algorithm implements Theorem 1 from:
33% Masuyama, H., & Takine, T. (2003). Sojourn time distribution in a
34% MAP/M/1 processor-sharing queue. Operations Research Letters, 31(6),
35% 406-412.
36%
37% Example:
38% % M/M/1-PS queue with lambda=0.8, mu=1
39% lambda = 0.8; mu = 1;
40% C = -lambda; D = lambda; % Poisson arrivals
41% x = linspace(0, 10, 100);
42% W_bar = map_m1ps_cdfrespt(C, D, mu, x);
43% plot(x, W_bar);
44% xlabel('x'); ylabel('Pr[W > x]');
45% title('Sojourn time distribution for M/M/1-PS');
46
47% Copyright (c) 2012-2025, Imperial College London
48% All rights reserved.
49
50%% Parse input arguments
51p = inputParser;
52addRequired(p, 'C', @(x) ismatrix(x) && size(x,1) == size(x,2));
53addRequired(p, 'D', @(x) ismatrix(x) && size(x,1) == size(x,2));
54addRequired(p, 'mu', @(x) isscalar(x) && x > 0);
55addRequired(p, 'x', @(x) isnumeric(x) && all(x >= 0));
56addParameter(p, 'Epsilon', 1e-11, @(x) isscalar(x) && x > 0 && x < 1);
57addParameter(p, 'EpsilonPrime', 1e-10, @(x) isscalar(x) && x > 0 && x < 1);
58addParameter(p, 'Verbose', false, @islogical);
59
60parse(p, C, D, mu, x, varargin{:});
61epsilon = p.Results.Epsilon;
62epsilon_prime = p.Results.EpsilonPrime;
63verbose = p.Results.Verbose;
64
65%% Validate inputs
66M = size(C, 1);
67if size(D, 1) ~= M || size(D, 2) ~= M
68 error('MAP_M1PS_CDFRESPT:DimensionMismatch', 'C and D must have the same size');
69end
70
71I = eye(M);
72e = ones(M, 1);
73
74%% Compute MAP parameters
75% Stationary probability vector pi of the underlying Markov chain
76% Solve: pi*(C + D) = 0, pi*e = 1
77Q = C + D;
78% Replace last row of Q with normalization constraint
79A = Q';
80A(end,:) = e';
81b = zeros(M, 1);
82b(end) = 1;
83pi = (A \ b)';
84
85% Mean arrival rate
86lambda = pi * D * e;
87
88if verbose
89 fprintf('MAP/M/1-PS Sojourn Time Computation\n');
90 fprintf(' M (states): %d\n', M);
91 fprintf(' lambda (arrival rate): %.4f\n', lambda);
92 fprintf(' mu (service rate): %.4f\n', mu);
93 fprintf(' rho (utilization): %.4f\n', lambda/mu);
94end
95
96% Check stability condition
97rho = lambda / mu;
98if rho >= 1
99 error('MAP_M1PS_CDFRESPT:Unstable', ...
100 'System is unstable (rho = %.4f >= 1)', rho);
101end
102
103%% Compute R matrix
104if verbose
105 fprintf('Computing R matrix...\n');
106end
107R = compute_R_matrix(C, D, mu);
108
109if verbose
110 fprintf(' R = %.6f\n', R);
111end
112
113%% Determine truncation point N(epsilon) for queue length
114% Find minimum N such that: (1/lambda) * sum_{n=0}^N pi_0 * R^n * D * e > 1 - epsilon
115% where pi_0 = pi * (I - R)
116pi_0 = pi * (I - R);
117
118% Compute spectral radius of R to estimate required N
119rho_R = max(abs(eig(R)));
120
121if rho_R >= 1
122 error('MAP_M1PS_CDFRESPT:UnstableR', 'R matrix has spectral radius >= 1');
123end
124
125% Estimate N based on spectral radius:
126% We need rho_R^N < epsilon * (1 - rho_R)
127% N > log(epsilon * (1 - rho_R)) / log(rho_R)
128if rho_R > 0
129 N_estimate = ceil(log(epsilon * (1 - rho_R)) / log(rho_R));
130else
131 N_estimate = 1;
132end
133
134% Cap at reasonable maximum to avoid excessive computation
135N_max = 10000;
136N_epsilon = min(max(N_estimate, 10), N_max);
137
138if verbose
139 fprintf(' Spectral radius of R: %.6f\n', rho_R);
140 fprintf(' Estimated N: %d, using N(epsilon): %d (capped at %d)\n', N_estimate, N_epsilon, N_max);
141end
142
143%% Compute uniformization parameter theta
144theta = max(abs(diag(C)));
145theta_plus_mu = theta + mu;
146
147%% Initialize output
148x = x(:)'; % Ensure row vector
149num_points = length(x);
150W_bar = zeros(1, num_points);
151
152if nargout > 1
153 W_bar_n = cell(N_epsilon + 1, 1);
154 for n = 0:N_epsilon
155 W_bar_n{n+1} = zeros(1, num_points);
156 end
157end
158
159%% Determine global K_max across all x points
160K_max_global = 0;
161L_per_point = zeros(1, num_points);
162K_per_point = zeros(1, num_points);
163for idx = 1:num_points
164 x_val = x(idx);
165 mean_val = theta_plus_mu * x_val;
166 if mean_val > 0
167 L_per_point(idx) = max(0, floor(mean_val - 10*sqrt(mean_val)));
168 R_upper = ceil(mean_val + 10*sqrt(mean_val));
169 pmf = poisspdf(L_per_point(idx):R_upper, mean_val);
170 cumsum_pmf = sum(pmf);
171 while cumsum_pmf < 1 - epsilon_prime && R_upper < 10000
172 R_upper = R_upper + 10;
173 pmf = poisspdf(L_per_point(idx):R_upper, mean_val);
174 cumsum_pmf = sum(pmf);
175 end
176 K_per_point(idx) = R_upper;
177 end
178 K_max_global = max(K_max_global, K_per_point(idx));
179end
180
181if verbose
182 fprintf(' K_max (global uniformization truncation): %d\n', K_max_global);
183 fprintf('Computing h_{n,k} recursion...\n');
184end
185
186%% Precompute h_{n,k} using 3D matrix (M x (N+1) x (K+1)) for fast indexing
187h = compute_h_matrix(C, D, mu, M, N_epsilon, K_max_global, theta);
188
189%% Precompute pi_0 * R^n * D weights with early truncation
190% Use iterative R^n multiplication and stop when weight is negligible
191weight_tol = epsilon * 1e-2;
192weights = zeros(N_epsilon + 1, M); % weights(n+1,:) = pi_0 * R^n * D (row vector)
193R_power = I; % R^0 = I
194N_actual = N_epsilon;
195for n = 0:N_epsilon
196 w = pi_0 * R_power * D;
197 weights(n+1, :) = w;
198 if n > 0 && norm(w, inf) < weight_tol
199 N_actual = n;
200 break;
201 end
202 R_power = R_power * R;
203end
204
205if verbose
206 fprintf(' N_actual (early truncation): %d / %d\n', N_actual, N_epsilon);
207end
208
209%% Compute sojourn time distribution for each x
210for idx = 1:num_points
211 x_val = x(idx);
212 L = L_per_point(idx);
213 K_max = K_per_point(idx);
214
215 % Precompute Poisson PMF values for this x point
216 poisson_vals = poisspdf(L:K_max, theta_plus_mu * x_val); % vector
217
218 for n = 0:N_actual
219 weight = weights(n+1, :); % 1 x M row vector
220
221 % Vectorized sum over k: sum_k(m) = sum_j poisson(j) * h(m, n+1, L+j+1)
222 h_slice = squeeze(h(:, n+1, L+1:K_max+1)); % M x (K_max-L+1)
223 if M == 1
224 h_slice = h_slice(:)'; % Ensure row vector for M=1
225 end
226 sum_k = h_slice * poisson_vals(:); % M x 1
227
228 term_n = (1/lambda) * weight * sum_k;
229 W_bar(idx) = W_bar(idx) + term_n;
230
231 % Store conditional distribution if requested
232 if nargout > 1
233 W_bar_n{n+1}(idx) = sum(sum_k) / M;
234 end
235 end
236end
237
238if verbose
239 fprintf('Computation complete.\n');
240end
241
242end
243
244%% ========================================================================
245% LOCAL HELPER FUNCTIONS
246% ========================================================================
247
248function R = compute_R_matrix(C, D, mu)
249% Compute rate matrix R for MAP/M/1 queue
250% Solves: D + R(C - mu*I) + mu*R^2 = O
251
252M = size(C, 1);
253I = eye(M);
254
255% For scalar case (M=1), use quadratic formula
256if M == 1
257 % mu*R^2 + (C - mu)*R + D = 0
258 a = mu;
259 b = C - mu;
260 c = D;
261 discriminant = b^2 - 4*a*c;
262
263 if discriminant < 0
264 error('compute_R_matrix:NoRealSolution', 'No real solution for R');
265 end
266
267 % Two solutions
268 R1 = (-b - sqrt(discriminant)) / (2*a);
269 R2 = (-b + sqrt(discriminant)) / (2*a);
270
271 % Choose minimal nonnegative solution
272 if R1 >= 0 && R1 < 1
273 R = R1;
274 elseif R2 >= 0 && R2 < 1
275 R = R2;
276 else
277 error('compute_R_matrix:NoValidSolution', 'No valid solution in [0,1) for R');
278 end
279else
280 % For matrix case, use fixed-point iteration
281 % From D + R*(C - mu*I) + mu*R^2 = 0, we can derive:
282 % R*(mu*I - C) = D + mu*R^2
283 % R = (D + mu*R^2) * inv(mu*I - C)
284 % which gives the fixed-point iteration: R_new = (D + mu*R_old^2) / (mu*I - C)
285
286 % Initial approximation
287 R = -D / (C - mu*I);
288 max_iter = 5000;
289 tol = 1e-10;
290
291 % Precompute (mu*I - C) for efficiency
292 muI_minus_C = mu*I - C;
293
294 for iter = 1:max_iter
295 R_old = R;
296 % Fixed-point iteration
297 R = (D + mu*(R*R)) / muI_minus_C;
298
299 % Check convergence
300 if norm(R - R_old, inf) < tol
301 break;
302 end
303 end
304
305 if iter == max_iter
306 warning('compute_R_matrix:SlowConvergence', ...
307 'R computation did not converge within %d iterations', max_iter);
308 end
309end
310
311% Ensure non-negativity (numerical errors might produce small negative values)
312R(R < 0) = 0;
313
314% Verify solution
315check = D + R*(C - mu*I) + mu*(R*R);
316if norm(check, inf) > 1e-4
317 warning('compute_R_matrix:LargeResidual', ...
318 'Large residual in R computation: ||residual|| = %e', norm(check, inf));
319end
320
321end
322
323function h = compute_h_matrix(C, D, mu, M, N, K, theta)
324% Compute h_{n,k} coefficients as a 3D matrix h(M, N+1, K+1)
325%
326% The h_{n,k} vectors satisfy the recursion (Theorem 1):
327% h_{n,0} = e (vector of ones), for n = 0, 1, ...
328% h_{n,k+1} = 1/(theta+mu) * [n*mu/(n+1) * h_{n-1,k} + (theta*I + C) * h_{n,k}
329% + D * h_{n+1,k}]
330% where h_{-1,k} = 0 for all k
331
332e = ones(M, 1);
333theta_plus_mu = theta + mu;
334theta_I_plus_C = theta * eye(M) + C;
335inv_tpm = 1 / theta_plus_mu;
336
337% Store h as M x (N+1) x (K+1) matrix
338h = zeros(M, N+1, K+1);
339
340% Base case: h_{n,0} = e for all n
341for n = 0:N
342 h(:, n+1, 1) = e;
343end
344
345% Precompute n*mu/(n+1) coefficients
346nmu_coeff = zeros(N+1, 1);
347for n = 1:N
348 nmu_coeff(n+1) = n * mu / (n + 1);
349end
350
351% Recursive computation: fill column by column (increasing k)
352for k = 0:K-1
353 % Vectorized over n: extract column k for all n values
354 h_col_k = h(:, :, k+1); % M x (N+1)
355
356 % Compute (theta*I + C) * h_{n,k} for all n at once
357 term2_all = theta_I_plus_C * h_col_k; % M x (N+1)
358
359 % Compute D * h_{n+1,k} for n=0..N-1
360 term3_all = D * h_col_k(:, 2:end); % M x N (for n=0..N-1)
361
362 for n = 0:N
363 term2 = term2_all(:, n+1);
364
365 % First term: n*mu/(n+1) * h_{n-1,k}
366 if n > 0
367 term1 = nmu_coeff(n+1) * h_col_k(:, n);
368 else
369 term1 = zeros(M, 1);
370 end
371
372 % Third term: D * h_{n+1,k}
373 if n < N
374 term3 = term3_all(:, n+1);
375 else
376 term3 = zeros(M, 1);
377 end
378
379 h(:, n+1, k+2) = inv_tpm * (term1 + term2 + term3);
380 end
381end
382
383end