2% Garbi, G et al. (2020). Learning Queueing Networks by Recurrent Neural Networks
4classdef QueueNetworkLearningRNNLayer < nnet.layer.Layer % ...
5 % & nnet.layer.Formattable ... % (Optional)
6 % & nnet.layer.Acceleratable % (Optional)
16 properties (Learnable)
17 % Layer learnable parameters.
27 function layer = QueueNetworkLearningRNNLayer(M,R,concurrency)
28 % Create a QueueNetworkLearningRNNLayer
32 layer.concurrency = concurrency;
35 layer.mu = layer.initializeUniformNonNeg([M,1]);
37 P = layer.initializeUniformNonNeg([M,M-1]);
39 oneHot = (find(mod((0:(M^2)-1), M+1)~=0))
'==1:M^2;
40 layer.P = reshape(reshape(P.', 1, []) * oneHot, M, M);
42 layer = layer.resetState();
45 function parameter = initializeUniformNonNeg(layer, sz)
48 parameter = a + (b-a).*rand(sz,
'single');
49 parameter = dlarray(parameter);
52 function [Z,state] = predict(layer, X)
53 numTimeSteps = size(X,4);
54 layer = layer.resetState();
55 Z = dlarray(zeros([size(X,1), size(X,2), 1, numTimeSteps]));
57 currentT = X(1,1,1,t);
58 oldT = layer.hiddenState(1);
59 deltaT = currentT - oldT;
60 pm = abs(layer.mu)
'.*(abs(layer.P) - layer.I);
61 pred = layer.hiddenState(2:end) + (deltaT*min(layer.hiddenState(2:end), layer.concurrency)) * pm;
66 xh_pred = cat(2, [currentT], pred);
68 layer.hiddenState = xh_pred;
69 Z(:,1,:,t) = currentT;
70 Z(:,2,:,t) = xh_pred(2:end);
75 function layer = resetState(layer)
76 layer.hiddenState = zeros(layer.M+1,1)
';