%This is a sample code to show how to simulate a discrete time Markov
%chain with a given transition probability matrix. It also plots an
%estimate of a value of the stationary distribution by averaging over a
%very long path.
%The inputs:
% n = the number of steps to take.
%The ouput:
% value = value of the estimate.
%
% More importantly, the code produces a plot of the running average.
% You can convert the plot to
% a .pdf to turn it in.
%
%How to call this function: put this script into the folder in which
%Matlab is open. Then type the following into Matlab:
% [X] = DTMC_SimulationAverage(10000);
%Note that my transition matrix is *not* the one you will need to use. In
%fact, my state space is {1,2,3}, whereas your state space is {1,2,3,4}.
function [value] = DTMC_SimulationAverage(n)
%This is the vector that will hold the entire realization of the chain.
%For example, X(1) is the initial state (1 in our case), X(2) is the second
%state of the chain, etc.
X = zeros([n+1,1]);
%This is the transition matrix of my imaginary chain. Your transition
%mattrix is different.
P = [ 1/3 2/3 0;
1/4 1/2 1/4;
1 0 0];
%Set the initial condition. The first state is told to be state 1.
X(1) = 1;
%The main FOR LOOP. It runs from 1 through n, and updates the state based
%upon the previous state.
%count will track the number of times I visit state 2.
count = zeros([n+1,1]);
for j = 1:n
%Generate a uniform random variable.
r = rand;
if r < P(X(j),1)
X(j+1) = 1;
count(j+1) = count(j);
elseif r < P(X(j),1) + P(X(j),2)
X(j+1) = 2;
count(j+1) = count(j);
else
X(j+1) = 3;
count(j+1) = count(j) + 1;
end
end
%This next step is where I divide by the number of steps. Note that each
%spot gets divided by a different value of k.
for k = 1:n+1
count(k) = (1/k)*count(k);
end
%I want to plot count vs n. Tn will be that vector.
Tn = zeros([n+1,1]);
for k = 1:n
Tn(k+1) = Tn(k) + 1;
end
figure
plot(Tn,count)
%Here is where I output the value:
value = count(n+1);
end