1function [Rt, t, names] = getTranReward(self, rewardName)
2% GETTRANREWARD Get transient expected reward metrics over time
4% [
RT, T, NAMES] = GETTRANREWARD(SELF) returns transient expected rewards
5%
for all defined rewards
7% [
RT, T, NAMES] = GETTRANREWARD(SELF, REWARDNAME) returns the transient
8% expected reward
for a specific named reward
10% This method computes the transient expected reward E[r(X(t))]
for each
11% time point t, where X(t)
is the system state at time t and r
is the
12% reward function. The computation uses the transient probability
13% distribution pi_t(s) from CTMC uniformization:
15% E[r(X(t))] = sum_s pi_t(s) * r(s)
18% Rt - Cell array {nRewards x 1} where each Rt{r}
is a
struct with:
19% .t - Time vector [nTimePoints x 1]
20% .metric - Expected reward at each time [nTimePoints x 1]
21% .name - Reward name string
22% Or a single struct if rewardName
is specified
23% t - Time vector [nTimePoints x 1]
24% names - Cell array of reward names, or single string if rewardName specified
27% model = Network('example');
28% % ... model setup ...
29% model.setReward('QLen', @(state) state.at(queue, class1));
30% solver = SolverCTMC(model, 'timespan', [0, 10]);
31% [Rt, t, names] = solver.getTranReward();
32% plot(t, Rt{1}.metric);
34% Copyright (c) 2012-2026, Imperial College London
41options = self.getOptions;
44if ~isfield(options, 'timespan') || ~isfinite(options.timespan(2))
45 line_error(mfilename, 'getTranReward requires a finite timespan. Use SolverCTMC(model, ''timespan'', [0, T]).');
48sn = self.model.getStruct(true);
50% Validate that rewards are defined
52 line_error(mfilename, 'No rewards defined. Use model.setReward(name, @(state) ...) before calling getTranReward.');
55nRewards = length(sn.reward);
57% Get transient probabilities and state space
58[InfGen, StateSpace, StateSpaceAggr, ~, ~, ~, sn] = solver_ctmc(sn, options);
60% Compute initial state probability
64 isf = sn.nodeToStateful(ist);
65 state = [state, zeros(1, size(sn.space{isf}, 2) - length(sn.state{isf})), sn.state{isf}];
69nstates = size(InfGen, 1);
70pi0 = zeros(1, nstates);
72state0 = matchrow(StateSpace, state);
74 line_error(mfilename, 'Initial state not contained in the state space.');
78% Compute transient probabilities
79[pit, t] = ctmc_transient(InfGen, pi0, options.timespan(1), options.timespan(2), options.stiff, [], options.timestep);
80pit(pit < GlobalConstants.Zero) = 0;
82% Build index maps for RewardState
83nodeToStationMap = containers.Map('KeyType', 'int32', 'ValueType', 'int32');
84classToIndexMap = containers.Map('KeyType', 'int32', 'ValueType', 'int32');
88 nodeToStationMap(int32(ind)) = sn.nodeToStation(ind);
93 classToIndexMap(int32(r)) = r;
96% Build reward vectors (one per reward definition)
97R = zeros(nRewards, nstates);
98names = cell(nRewards, 1);
100 names{r} = sn.reward{r}.name;
101 rewardFn = sn.reward{r}.fn;
104 % Create RewardState for this state row
105 stateRow = StateSpaceAggr(s, :);
106 rewardState = RewardState(stateRow, sn, nodeToStationMap, classToIndexMap);
108 % Try calling with RewardState (new API, single argument)
110 R(r, s) = rewardFn(rewardState);
112 % If that fails, try backward compatibility with @(state, sn) signature
114 R(r, s) = rewardFn(StateSpaceAggr(s, :), sn);
122% Compute transient expected rewards: E[r(X(t))] = pit * R'
123Rt = cell(nRewards, 1);
125 reward_t = pit * R(r, :)';
127 metricVal = struct();
129 metricVal.metric = reward_t;
130 metricVal.name = names{r};
134% Filter to specific reward if requested
135if ~isempty(rewardName)
136 rewardName = char(rewardName);
137 idx = find(strcmp(names, rewardName));
139 line_error(mfilename, 'Reward "%s" not found. Available rewards: %s', ...
140 rewardName, strjoin(names, ', '));