1function [R, names] = getAvgReward(self)
2% GETAVGREWARD Get steady-state expected reward
for all defined rewards
4% [R, NAMES] = GETAVGREWARD(SELF)
7% R - Vector of steady-state expected rewards [nRewards x 1]
8% names - Cell array of reward names {nRewards x 1}
10% The steady-state expected reward
is computed as the average reward rate
11% from the value iteration, which converges to E[r] = pi * r where pi
is
12% the steady-state distribution.
15% model.setReward(
'qlen', @(state, sn) state(2));
16% model.setReward(
'util', @(state, sn) min(state(2), 1));
17% solver = SolverCTMC(model);
18% [R, names] = solver.getAvgReward();
19% fprintf(
'Average queue length: %f\n', R(1));
21% Copyright (c) 2012-2026, Imperial College London
24% Check
if results are already cached
25if isfield(self.result,
'Reward') && ~isempty(self.result.Reward) && ...
26 isfield(self.result.Reward,
'steadyState') && ~isempty(self.result.Reward.steadyState)
27 R = self.result.Reward.steadyState;
28 names = self.result.Reward.names;
30 % Run the reward analyzer
31 self.runRewardAnalyzer();
32 R = self.result.Reward.steadyState;
33 names = self.result.Reward.names;