1function [V, t, names, stateSpace] = runRewardAnalyzer(self)
2% RUNREWARDANALYZER Compute steady-state and transient rewards
4% [V, T, NAMES, STATESPACE] = RUNREWARDANALYZER(SELF)
7% - Steady-state expected rewards
using the CTMC stationary distribution:
8% E[r] = sum_s pi(s) * r(s)
9% - Transient value functions
using uniformization with value iteration
11% Results are cached in self.result.Reward
for subsequent calls.
14% V - Cell array of value functions {nRewards x 1}
15% Each V{r}
is [Tmax+1 x nStates] matrix
16% t - Time vector [1 x Tmax+1]
17% names - Cell array of reward names {nRewards x 1}
18% stateSpace - State space matrix [nStates x nDims]
20% Copyright (c) 2012-2026, Imperial College London
23% Get network structure with rewards
24sn = self.model.getStruct(
true);
26% Validate that rewards are defined
28 error('No rewards defined. Use model.setReward(name, @(state) ...) before calling reward analysis.');
31% Run the reward solver
33[steadyState, names, stateSpace, pi, V, t] = solver_ctmc_reward(sn, self.options);
37self.result.Reward.V = V;
38self.result.Reward.t = t;
39self.result.Reward.names = names;
40self.result.Reward.stateSpace = stateSpace;
41self.result.Reward.steadyState = steadyState;
42self.result.Reward.pi = pi;
43self.result.Reward.runtime = runtime;